Change shadow_direct_map_init/clean param from vcpu to domain.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Wed, 22 Feb 2006 18:23:35 +0000 (19:23 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Wed, 22 Feb 2006 18:23:35 +0000 (19:23 +0100)
Also some cleanups.

Signed-off-by: Xin Li <xin.b.li@intel.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/shadow.c
xen/arch/x86/shadow32.c
xen/arch/x86/shadow_public.c
xen/include/asm-x86/shadow.h

index 2c7569ec6805db7db950593e88c0ce13fb096d36..b76e5328cc2f2f9fc1dd229823511f637786e034 100644 (file)
@@ -190,9 +190,11 @@ void hvm_setup_platform(struct domain* d)
 {
     struct hvm_domain *platform;
 
-    if (!(HVM_DOMAIN(current) && (current->vcpu_id == 0)))
+    if ( !HVM_DOMAIN(current) || (current->vcpu_id != 0) )
         return;
 
+    shadow_direct_map_init(d);
+
     hvm_map_io_shared_page(d);
     hvm_get_info(d);
 
@@ -200,7 +202,8 @@ void hvm_setup_platform(struct domain* d)
     pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
     register_pic_io_hook();
 
-    if ( hvm_apic_support(d) ) {
+    if ( hvm_apic_support(d) )
+    {
         spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
         hvm_vioapic_init(d);
     }
index 2c8c4a0d843e06282f455dcfc0a45004e2724294..1a085e7989a1a497b8009c77e199ee8a96af06c8 100644 (file)
@@ -797,12 +797,13 @@ void svm_relinquish_resources(struct vcpu *v)
     free_host_save_area(v->arch.hvm_svm.host_save_area);
 #endif
 
-    if (v->vcpu_id == 0) {
+    if ( v->vcpu_id == 0 )
+    {
         /* unmap IO shared page */
         struct domain *d = v->domain;
-        if (d->arch.hvm_domain.shared_page_va)
+        if ( d->arch.hvm_domain.shared_page_va )
             unmap_domain_page((void *)d->arch.hvm_domain.shared_page_va);
-        shadow_direct_map_clean(v);
+        shadow_direct_map_clean(d);
     }
 
     destroy_vmcb(&v->arch.hvm_svm);
index 51253ee666872fca7bba7a0b776b99e7fa15e5ec..4796adb0268847831f870a09b4cb18f542f2824e 100644 (file)
@@ -444,8 +444,6 @@ void svm_do_launch(struct vcpu *v)
         printk("%s: phys_table   = %lx\n", __func__, pt);
     }
 
-    shadow_direct_map_init(v);
-
     if ( svm_paging_enabled(v) )
         vmcb->cr3 = pagetable_get_paddr(v->arch.guest_table);
     else
index f79be0277e304329c1f9e849a8c4db32a95fc607..5fee564d97659d5778d2c5af7a78660237955bd0 100644 (file)
@@ -230,7 +230,6 @@ static void vmx_do_launch(struct vcpu *v)
     error |= __vmwrite(GUEST_TR_BASE, 0);
     error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
 
-    shadow_direct_map_init(v);
     __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
     __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
 
index 9b935d4df67bb48fda39fdd8c3134b5fcf02331d..d0403161ef7b117c36c8d9b4e8493d389bd38b69 100644 (file)
@@ -81,14 +81,14 @@ void vmx_final_setup_guest(struct vcpu *v)
 void vmx_relinquish_resources(struct vcpu *v)
 {
     struct hvm_virpit *vpit;
-    
+
     if (v->vcpu_id == 0) {
         /* unmap IO shared page */
         struct domain *d = v->domain;
         if ( d->arch.hvm_domain.shared_page_va )
             unmap_domain_page_global(
                (void *)d->arch.hvm_domain.shared_page_va);
-        shadow_direct_map_clean(v);
+        shadow_direct_map_clean(d);
     }
 
     vmx_request_clear_vmcs(v);
index 166d026c0be8592df990d7c755fabcd28276f0d9..3e85cf22643b2cca61f7ecf3f19ca47d24b0c7e7 100644 (file)
@@ -3950,11 +3950,11 @@ int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
 
     __direct_get_l3e(v, vpa, &sl3e);
 
-    if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 
+    if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) )
     {
         page = alloc_domheap_page(NULL);
         if ( !page )
-            goto nomem; 
+            goto nomem;
 
         smfn = page_to_mfn(page);
         sl3e = l3e_from_pfn(smfn, _PAGE_PRESENT);
@@ -3968,11 +3968,11 @@ int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
 
     __direct_get_l2e(v, vpa, &sl2e);
 
-    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) ) 
+    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
     {
         page = alloc_domheap_page(NULL);
         if ( !page )
-            goto nomem; 
+            goto nomem;
 
         smfn = page_to_mfn(page);
         sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
@@ -3985,11 +3985,11 @@ int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
 
     __direct_get_l1e(v, vpa, &sl1e);
 
-    if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) ) 
+    if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) )
     {
         sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
         __direct_set_l1e(v, vpa, &sl1e);
-    } 
+    }
 
     shadow_unlock(d);
     return EXCRET_fault_fixed;
@@ -3998,7 +3998,7 @@ fail:
     return 0;
 
 nomem:
-    shadow_direct_map_clean(v);
+    shadow_direct_map_clean(d);
     domain_crash_synchronous();
 }
 #endif
index 5264b0359b144b93724c2f354f879702efcb0acf..916cc3c09ee35eca367565602985a26f283c8d49 100644 (file)
@@ -1044,7 +1044,7 @@ int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
     }
 
     shadow_lock(d);
-  
+
    __direct_get_l2e(v, vpa, &sl2e);
 
     if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
@@ -1059,7 +1059,7 @@ int shadow_direct_map_fault(unsigned long vpa, struct cpu_user_regs *regs)
         sple = (l1_pgentry_t *)map_domain_page(smfn);
         memset(sple, 0, PAGE_SIZE);
         __direct_set_l2e(v, vpa, sl2e);
-    } 
+    }
 
     if ( !sple )
         sple = (l1_pgentry_t *)map_domain_page(l2e_get_pfn(sl2e));
@@ -1082,36 +1082,32 @@ fail:
     return 0;
 
 nomem:
-    shadow_direct_map_clean(v);
+    shadow_direct_map_clean(d);
     domain_crash_synchronous();
 }
 
 
-int shadow_direct_map_init(struct vcpu *v)
+int shadow_direct_map_init(struct domain *d)
 {
     struct page_info *page;
     l2_pgentry_t *root;
 
     if ( !(page = alloc_domheap_page(NULL)) )
-        goto fail;
+        return 0;
 
     root = map_domain_page(page_to_mfn(page));
     memset(root, 0, PAGE_SIZE);
     unmap_domain_page(root);
 
-    v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
+    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
 
     return 1;
-
-fail:
-    return 0;
 }
 
-void shadow_direct_map_clean(struct vcpu *v)
+void shadow_direct_map_clean(struct domain *d)
 {
     int i;
     unsigned long mfn;
-    struct domain *d = v->domain;
     l2_pgentry_t *l2e;
 
     mfn =  pagetable_get_pfn(d->arch.phys_table);
@@ -1143,7 +1139,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
 
     if(!new_modes) /* Nothing to do - return success */
         return 0; 
-        
+
     // can't take anything away by calling this function.
     ASSERT(!(d->arch.shadow_mode & ~mode));
 
index 7dfe111f2f569dcc52391bc04eb938b719be3673..82aa9b002ee795aba5df839fe4a7da528763b9f0 100644 (file)
@@ -36,31 +36,27 @@ static void free_p2m_table(struct vcpu *v);
 #define SHADOW_MAX_GUEST32(_encoded) ((L1_PAGETABLE_ENTRIES_32 - 1) - ((_encoded) >> 16))
 
 
-int shadow_direct_map_init(struct vcpu *v)
+int shadow_direct_map_init(struct domain *d)
 {
     struct page_info *page;
     l3_pgentry_t *root;
 
     if ( !(page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA)) )
-        goto fail;
+        return 0;
 
     root = map_domain_page(page_to_mfn(page));
     memset(root, 0, PAGE_SIZE);
     root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
 
-    v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
+    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
 
     unmap_domain_page(root);
     return 1;
-
-fail:
-    return 0;
 }
 
-void shadow_direct_map_clean(struct vcpu *v)
+void shadow_direct_map_clean(struct domain *d)
 {
     unsigned long mfn;
-    struct domain *d = v->domain;
     l2_pgentry_t *l2e;
     l3_pgentry_t *l3e;
     int i, j;
index 6c41b9b3f1e37cf912a8d0493b99a21fe3a43373..a9e3789f8df1a0f05c77529a8ea24b2e8f4aeef1 100644 (file)
@@ -115,8 +115,8 @@ do {                                            \
 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min))
 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
 #define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded) >> 16))
-extern void shadow_direct_map_clean(struct vcpu *v);
-extern int shadow_direct_map_init(struct vcpu *v);
+extern void shadow_direct_map_clean(struct domain *d);
+extern int shadow_direct_map_init(struct domain *d);
 extern int shadow_direct_map_fault(
     unsigned long vpa, struct cpu_user_regs *regs);
 extern void shadow_mode_init(void);